In [1]:
import tensorflow as tf
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
os.environ['CUDA_VISIBLE_DEVICES'] = '0'  # -1  to USE CPU
from keras.layers import Conv2D, Input, ZeroPadding2D, BatchNormalization, Activation, MaxPooling2D, Flatten, Dense,Dropout
from keras.models import Model, load_model
from keras.callbacks import TensorBoard, ModelCheckpoint, ReduceLROnPlateau
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.utils import shuffle
import cv2
import imutils
import numpy as np
import matplotlib.pyplot as plt
import time
from os import listdir

get_ipython().magic(u'matplotlib inline')
In [3]:
def crop_brain_contour(image, plot=False):

    # Convert the image to grayscale, and blur it slightly
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    gray = cv2.GaussianBlur(gray, (5, 5), 0)

    # Threshold the image, then perform a series of erosions +
    # dilations to remove any small regions of noise
    thresh = cv2.threshold(gray, 45, 255, cv2.THRESH_BINARY)[1]
    thresh = cv2.erode(thresh, None, iterations=2)
    thresh = cv2.dilate(thresh, None, iterations=2)

    # Find contours in thresholded image, then grab the largest one
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    cnts = imutils.grab_contours(cnts)
    c = max(cnts, key=cv2.contourArea)

    # Find the extreme points
    extLeft = tuple(c[c[:, :, 0].argmin()][0])
    extRight = tuple(c[c[:, :, 0].argmax()][0])
    extTop = tuple(c[c[:, :, 1].argmin()][0])
    extBot = tuple(c[c[:, :, 1].argmax()][0])

    # crop new image out of the original image using the four extreme points (left, right, top, bottom)
    new_image = image[extTop[1]:extBot[1], extLeft[0]:extRight[0]]

    if plot:
        plt.figure()

        plt.subplot(1, 2, 1)
        plt.imshow(image)

        plt.tick_params(axis='both', which='both',
                        top=False, bottom=False, left=False, right=False,
                        labelbottom=False, labeltop=False, labelleft=False, labelright=False)
        plt.title('Original Image')
        plt.subplot(1, 2, 2)
        plt.imshow(new_image)
        plt.tick_params(axis='both', which='both',
                        top=False, bottom=False, left=False, right=False,
                        labelbottom=False, labeltop=False, labelleft=False, labelright=False)
        plt.title('Cropped Image')
        plt.show()
    return new_image
In [4]:
def canny_convert(image):
    # image = cv2.imread(path)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    gradients_sobelx = cv2.Sobel(image, -1, 1, 0)
    gradients_sobely = cv2.Sobel(image, -1, 0, 1)
    gradients_sobelxy = cv2.addWeighted(gradients_sobelx, 0.5, gradients_sobely, 0.5, 0)
    gradients_laplacian = cv2.Laplacian(image, -1)
    canny_output = cv2.Canny(image, 80, 120)

    # cv2_imshow(gradients_sobelx)
    # cv2_imshow(gradients_sobely)
    # cv2_imshow(gradients_sobelxy)
    # cv2_imshow(gradients_laplacian)

    return canny_output
In [5]:
ex_img = cv2.imread('yes/Y1.jpg')
# ex_img = canny_convert('yes/Y1.jpg')
# print(ex_img)
# print(ex_img.shape)
# cv2_imshow(ex_img)
# ex_img = cv2.cvtColor(ex_img,cv2.COLOR_GRAY2RGB)
# print(backtorgb.shape)
# cv2_imshow(ex_img)
# print(ex_img.shape)
ex_new_img = crop_brain_contour(ex_img, True)
In [6]:
def load_data(dir_list, image_size):
    """
    Read images, resize and normalize them. 
    Arguments:
        dir_list: list of strings representing file directories.
    Returns:
        X: A numpy array with shape = (#_examples, image_width, image_height, #_channels)
        y: A numpy array with shape = (#_examples, 1)
    """
    # load all images in a directory
    X = [] 
    p = []
    y = []
    q = []
    image_width, image_height = image_size
    for directory in dir_list:
        for filename in listdir(directory):
            print(filename)
            print(directory)
            # load the image
            image = cv2.imread(directory + '/' + filename)
            # print(image)
            # crop the brain and ignore the unnecessary rest part of the image
            image = crop_brain_contour(image, plot=False)
            image_ = canny_convert(image)
            # resize image
            image = cv2.resize(image, dsize=(image_width, image_height), interpolation=cv2.INTER_CUBIC)
            image_ = cv2.resize(image_, dsize=(image_width, image_height), interpolation=cv2.INTER_CUBIC)
            # normalize values
            image = image / 255.
            image_ = image_ / 255.
            # convert image to numpy array and append it to X
            X.append(image)
            p.append(image_)
            # append a value of 1 to the target array if the image
            # is in the folder named 'yes', otherwise append 0.
            if directory[-3:] == 'yes':
                y.append([1])
                q.append([1])
            else:
                y.append([0])
                q.append([0])
    X = np.array(X)
    p = np.array(p)
    y = np.array(y)
    q = np.array(q)
    # Shuffle the data
    X, y = shuffle(X, y)
    p, q = shuffle(p, q)
    print('Number of examples is:{}'.format(len(X)))
    return X,y, p, q
In [7]:
augmented_path = ''
# augmented data (yes and no) contains both the original and the new generated examples
augmented_yes = augmented_path + 'yes'
augmented_no = augmented_path + 'no'
IMG_WIDTH, IMG_HEIGHT = (240, 240)
X, y, p, q = load_data([augmented_yes, augmented_no], (IMG_WIDTH, IMG_HEIGHT))
Y1.jpg
yes
Y10.jpg
yes
Y100.JPG
yes
Y101.jpg
yes
Y102.jpg
yes
Y103.jpg
yes
Y104.jpg
yes
Y105.jpg
yes
Y106.jpg
yes
Y107.jpg
yes
Y108.jpg
yes
Y109.JPG
yes
Y11.jpg
yes
Y111.JPG
yes
Y112.JPG
yes
Y113.JPG
yes
Y114.JPG
yes
Y115.JPG
yes
Y116.JPG
yes
Y117.JPG
yes
Y12.jpg
yes
Y120.JPG
yes
Y13.jpg
yes
Y14.jpg
yes
Y146.JPG
yes
Y147.JPG
yes
Y148.JPG
yes
Y15.jpg
yes
Y153.jpg
yes
Y154.jpg
yes
Y155.JPG
yes
Y156.JPG
yes
Y157.JPG
yes
Y158.JPG
yes
Y159.JPG
yes
Y16.JPG
yes
Y160.JPG
yes
Y161.JPG
yes
Y162.jpg
yes
Y163.JPG
yes
Y164.JPG
yes
Y165.JPG
yes
Y166.JPG
yes
Y167.JPG
yes
Y168.jpg
yes
Y169.jpg
yes
Y17.jpg
yes
Y170.JPG
yes
Y18.JPG
yes
Y180.jpg
yes
Y181.jpg
yes
Y182.JPG
yes
Y183.jpg
yes
Y184.JPG
yes
Y185.jpg
yes
Y186.jpg
yes
Y187.jpg
yes
Y188.jpg
yes
Y19.JPG
yes
Y192.JPG
yes
Y193.JPG
yes
Y194.jpg
yes
Y195.JPG
yes
Y2.jpg
yes
Y20.jpg
yes
Y21.jpg
yes
Y22.jpg
yes
Y23.JPG
yes
Y24.jpg
yes
Y242.JPG
yes
Y243.JPG
yes
Y244.JPG
yes
Y245.jpg
yes
Y246.JPG
yes
Y247.JPG
yes
Y248.JPG
yes
Y249.JPG
yes
Y25.jpg
yes
Y250.jpg
yes
Y251.JPG
yes
Y252.jpg
yes
Y253.JPG
yes
Y254.jpg
yes
Y255.JPG
yes
Y256.JPG
yes
Y257.jpg
yes
Y258.JPG
yes
Y259.JPG
yes
Y26.jpg
yes
Y27.jpg
yes
Y28.jpg
yes
Y29.jpg
yes
Y3.jpg
yes
Y30.jpg
yes
Y31.jpg
yes
Y32.jpg
yes
Y33.jpg
yes
Y34.jpg
yes
Y35.jpg
yes
Y36.JPG
yes
Y37.jpg
yes
Y38.jpg
yes
Y39.jpg
yes
Y4.jpg
yes
Y40.JPG
yes
Y41.jpg
yes
Y42.jpg
yes
Y44.JPG
yes
Y45.JPG
yes
Y46.jpg
yes
Y47.JPG
yes
Y49.JPG
yes
Y50.JPG
yes
Y51.jpg
yes
Y52.jpg
yes
Y53.jpg
yes
Y54.jpg
yes
Y55.jpg
yes
Y56.jpg
yes
Y58.JPG
yes
Y59.JPG
yes
Y6.jpg
yes
Y60.jpg
yes
Y61.jpg
yes
Y62.jpg
yes
Y65.JPG
yes
Y66.JPG
yes
Y67.JPG
yes
Y69.jpg
yes
Y7.jpg
yes
Y70.jpg
yes
Y71.JPG
yes
Y73.jpg
yes
Y74.jpg
yes
Y75.JPG
yes
Y76.jpg
yes
Y77.jpg
yes
Y78.jpg
yes
Y79.jpg
yes
Y8.jpg
yes
Y81.jpg
yes
Y82.jpg
yes
Y85.JPG
yes
Y86.JPG
yes
Y89.JPG
yes
Y9.jpg
yes
Y90.jpg
yes
Y91.jpg
yes
Y92.jpg
yes
Y92.png
yes
Y95.jpg
yes
Y96.jpg
yes
Y97.JPG
yes
Y98.JPG
yes
Y99.JPG
yes
1 no.jpeg
no
10 no.jpg
no
11 no.jpg
no
12 no.jpg
no
13 no.jpg
no
14 no.jpg
no
15 no.jpg
no
17 no.jpg
no
18 no.jpg
no
19 no.jpg
no
2 no.jpeg
no
20 no.jpg
no
21 no.jpg
no
22 no.jpg
no
23 no.jpg
no
24 no.jpg
no
25 no.jpg
no
26 no.jpg
no
27 no.jpg
no
28 no.jpg
no
29 no.jpg
no
3 no.jpg
no
30 no.jpg
no
31 no.jpg
no
32 no.jpg
no
33 no.jpg
no
34 no.jpg
no
35 no.jpg
no
36 no.jpg
no
37 no.jpg
no
38 no.jpg
no
39 no.jpg
no
4 no.jpg
no
40 no.jpg
no
41 no.jpg
no
42 no.jpg
no
43 no.jpg
no
44no.jpg
no
45 no.jpg
no
46 no.jpg
no
47 no.jpg
no
48 no.jpeg
no
49 no.jpg
no
5 no.jpg
no
50 no.jpg
no
6 no.jpg
no
7 no.jpg
no
8 no.jpg
no
9 no.jpg
no
N1.JPG
no
N11.jpg
no
N15.jpg
no
N16.jpg
no
N17.jpg
no
N19.JPG
no
N2.JPG
no
N20.JPG
no
N21.jpg
no
N22.JPG
no
N26.JPG
no
N3.jpg
no
N5.jpg
no
N6.jpg
no
no 1.jpg
no
no 10.jpg
no
no 100.jpg
no
no 2.jpg
no
no 3.jpg
no
no 4.jpg
no
no 5.jpeg
no
no 6.jpg
no
no 7.jpeg
no
no 8.jpg
no
no 89.jpg
no
no 9.png
no
no 90.jpg
no
no 91.jpeg
no
no 92.jpg
no
no 923.jpg
no
no 94.jpg
no
no 95.jpg
no
no 96.jpg
no
no 97.jpg
no
no 98.jpg
no
no 99.jpg
no
no.jpg
no
No11.jpg
no
No12.jpg
no
No13.jpg
no
No14.jpg
no
No15.jpg
no
No16.jpg
no
No17.jpg
no
No18.jpg
no
No19.jpg
no
No20.jpg
no
No21.jpg
no
No22.jpg
no
Number of examples is:253
In [8]:
def plot_sample_images(X, y, n=50):

    for label in [0,1]:
        # grab the first n images with the corresponding y values equal to label
        images = X[np.argwhere(y == label)]
        n_images = images[:n]

        columns_n = 10
        rows_n = int(n/ columns_n)

        plt.figure(figsize=(20, 10))

        i = 1 # current plot        
        for image in n_images:
            plt.subplot(rows_n, columns_n, i)
            plt.imshow(image[0])

            # remove ticks
            plt.tick_params(axis='both', which='both',
                            top=False, bottom=False, left=False, right=False,
                           labelbottom=False, labeltop=False, labelleft=False, labelright=False)

            i += 1

        label_to_str = lambda label: "Yes" if label == 1 else "No"
        plt.suptitle("Brain Tumor: {}".format(label_to_str(label)))
        plt.show()

plot_sample_images(p, q)
In [9]:
def split_data(X, y, test_size=0.2):

    X_train, X_test_val, y_train, y_test_val = train_test_split(X, y, test_size=test_size)
    X_test, X_val, y_test, y_val = train_test_split(X_test_val, y_test_val, test_size=0.5)
    return X_train, y_train, X_val, y_val, X_test, y_test
In [10]:
X_train, y_train, X_val, y_val, X_test, y_test = split_data(X, y, test_size=0.3)
In [11]:
print ("number of training examples = " + str(X_train.shape[0]))
print ("number of development examples = " + str(X_val.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(y_train.shape))
print ("X_val (dev) shape: " + str(X_val.shape))
print ("Y_val (dev) shape: " + str(y_val.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(y_test.shape))
number of training examples = 177
number of development examples = 38
number of test examples = 38
X_train shape: (177, 240, 240, 3)
Y_train shape: (177, 1)
X_val (dev) shape: (38, 240, 240, 3)
Y_val (dev) shape: (38, 1)
X_test shape: (38, 240, 240, 3)
Y_test shape: (38, 1)
In [12]:
def hms_string(sec_elapsed):
    h = int(sec_elapsed / (60 * 60))
    m = int((sec_elapsed % (60 * 60)) / 60)
    s = sec_elapsed % 60
    return "{}:{}:{}".format(h,m,round(s,1))
In [13]:
def compute_f1_score(y_true, prob):
    # convert the vector of probabilities to a target vector
    y_pred = np.where(prob > 0.5, 1, 0)
    score = f1_score(y_true, y_pred)

    return score
In [14]:
def build_model(input_shape):
    # Define the input placeholder as a tensor with shape input_shape. 
    X_input = Input(input_shape)
    X = Conv2D(64, (3, 3), strides = (1, 1), name = 'conv0')(X_input)
    X = BatchNormalization(axis = 3, name = 'bn0')(X)
    X = Activation('relu')(X) # 

    # MAXPOOL
    X = MaxPooling2D((2, 2), name='max_pool0')(X) # 
    # X=Dropout(0.50)(X)


    X = Conv2D(128, (5, 5), strides=(2, 2), name='conv1')(X)
    X = BatchNormalization(axis=3, name='bn1')(X)
    X = Activation('relu')(X)  # shape=(?, 238, 238, 32)
    # MAXPOOL
    X = MaxPooling2D((4, 4), name='max_pool1')(X) # shape=(?, 14, 14, 32)
    X=Dropout(0.50)(X)

    # FLATTEN X 
    X = Flatten()(X) # shape=(?, 6272)
    # FULLYCONNECTED
    X = Dense(1, activation='sigmoid', name='fc')(X) # shape=(?, 1)


    model = Model(inputs = X_input, outputs = X, name='BrainDetectionModel')

    return model
In [15]:
IMG_SHAPE = (IMG_WIDTH, IMG_HEIGHT, 3)
model = build_model(IMG_SHAPE)
model.summary()
Model: "BrainDetectionModel"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 input_1 (InputLayer)        [(None, 240, 240, 3)]     0         
                                                                 
 conv0 (Conv2D)              (None, 238, 238, 64)      1792      
                                                                 
 bn0 (BatchNormalization)    (None, 238, 238, 64)      256       
                                                                 
 activation (Activation)     (None, 238, 238, 64)      0         
                                                                 
 max_pool0 (MaxPooling2D)    (None, 119, 119, 64)      0         
                                                                 
 conv1 (Conv2D)              (None, 58, 58, 128)       204928    
                                                                 
 bn1 (BatchNormalization)    (None, 58, 58, 128)       512       
                                                                 
 activation_1 (Activation)   (None, 58, 58, 128)       0         
                                                                 
 max_pool1 (MaxPooling2D)    (None, 14, 14, 128)       0         
                                                                 
 dropout (Dropout)           (None, 14, 14, 128)       0         
                                                                 
 flatten (Flatten)           (None, 25088)             0         
                                                                 
 fc (Dense)                  (None, 1)                 25089     
                                                                 
=================================================================
Total params: 232,577
Trainable params: 232,193
Non-trainable params: 384
_________________________________________________________________
In [16]:
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])


import time

# tensorboard
log_file_name = 'brain_tumor_detection_cnn_{}'.format(int(time.time()))
tensorboard = TensorBoard(log_dir='logs/{}'.format(log_file_name))




# checkpoint
# unique file name that will include the epoch and the validation (development) accuracy
filepath="cnn-parameters-improvement-{epoch:02d}-{val_accuracy:.2f}"
# save the model with the best validation (development) accuracy till now
checkpoint = ModelCheckpoint("models/{}.model".format(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max'))
# my_callbacks = [
#     tf.keras.callbacks.EarlyStopping(patience=2),
#     tf.keras.callbacks.ModelCheckpoint(filepath='model.{epoch:02d}-{val_loss:.2f}.h5'),
#     tf.keras.callbacks.TensorBoard(log_dir='./logs'),
# ]
In [17]:
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                              patience=5, min_lr=0.0001)
In [18]:
start_time = time.time()

model.fit(x=X_train, y=y_train, batch_size=32, epochs=30, validation_data=(X_val, y_val), callbacks=[reduce_lr,tensorboard, checkpoint])
# history = model.fit(X_train, y_train, validation_split=0.1, epochs=20, callbacks=my_callbacks)

end_time = time.time()
execution_time = (end_time - start_time)
print("Elapsed time: {}".format(hms_string(execution_time)))
Epoch 1/30
6/6 [==============================] - ETA: 0s - loss: 2.7325 - accuracy: 0.5424
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-01-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-01-0.61.model\assets
6/6 [==============================] - 36s 5s/step - loss: 2.7325 - accuracy: 0.5424 - val_loss: 0.6678 - val_accuracy: 0.6053 - lr: 0.0010
Epoch 2/30
6/6 [==============================] - ETA: 0s - loss: 1.0555 - accuracy: 0.6554
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-02-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-02-0.61.model\assets
6/6 [==============================] - 28s 5s/step - loss: 1.0555 - accuracy: 0.6554 - val_loss: 0.6681 - val_accuracy: 0.6053 - lr: 0.0010
Epoch 3/30
6/6 [==============================] - ETA: 0s - loss: 0.5656 - accuracy: 0.7966
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-03-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-03-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.5656 - accuracy: 0.7966 - val_loss: 0.7699 - val_accuracy: 0.6053 - lr: 0.0010
Epoch 4/30
6/6 [==============================] - ETA: 0s - loss: 0.4226 - accuracy: 0.8362
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-04-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-04-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.4226 - accuracy: 0.8362 - val_loss: 0.8067 - val_accuracy: 0.6053 - lr: 0.0010
Epoch 5/30
6/6 [==============================] - ETA: 0s - loss: 0.3376 - accuracy: 0.8701
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-05-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-05-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.3376 - accuracy: 0.8701 - val_loss: 0.6937 - val_accuracy: 0.6053 - lr: 0.0010
Epoch 6/30
6/6 [==============================] - ETA: 0s - loss: 0.3017 - accuracy: 0.8418
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-06-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-06-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.3017 - accuracy: 0.8418 - val_loss: 0.7196 - val_accuracy: 0.6053 - lr: 0.0010
Epoch 7/30
6/6 [==============================] - ETA: 0s - loss: 0.2679 - accuracy: 0.9040
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-07-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-07-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.2679 - accuracy: 0.9040 - val_loss: 0.7898 - val_accuracy: 0.6053 - lr: 2.0000e-04
Epoch 8/30
6/6 [==============================] - ETA: 0s - loss: 0.2465 - accuracy: 0.9040
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-08-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-08-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.2465 - accuracy: 0.9040 - val_loss: 0.7676 - val_accuracy: 0.6053 - lr: 2.0000e-04
Epoch 9/30
6/6 [==============================] - ETA: 0s - loss: 0.2255 - accuracy: 0.9209
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-09-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-09-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.2255 - accuracy: 0.9209 - val_loss: 0.7466 - val_accuracy: 0.6053 - lr: 2.0000e-04
Epoch 10/30
6/6 [==============================] - ETA: 0s - loss: 0.1887 - accuracy: 0.9209
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-10-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-10-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.1887 - accuracy: 0.9209 - val_loss: 0.8414 - val_accuracy: 0.6053 - lr: 2.0000e-04
Epoch 11/30
6/6 [==============================] - ETA: 0s - loss: 0.1672 - accuracy: 0.9322
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-11-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-11-0.61.model\assets
6/6 [==============================] - 24s 4s/step - loss: 0.1672 - accuracy: 0.9322 - val_loss: 0.7974 - val_accuracy: 0.6053 - lr: 2.0000e-04
Epoch 12/30
6/6 [==============================] - ETA: 0s - loss: 0.2169 - accuracy: 0.9266
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-12-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-12-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.2169 - accuracy: 0.9266 - val_loss: 0.8134 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 13/30
6/6 [==============================] - ETA: 0s - loss: 0.1835 - accuracy: 0.9209
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-13-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-13-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1835 - accuracy: 0.9209 - val_loss: 0.8683 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 14/30
6/6 [==============================] - ETA: 0s - loss: 0.1631 - accuracy: 0.9379
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-14-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-14-0.61.model\assets
6/6 [==============================] - 26s 5s/step - loss: 0.1631 - accuracy: 0.9379 - val_loss: 0.8596 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 15/30
6/6 [==============================] - ETA: 0s - loss: 0.1337 - accuracy: 0.9322
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-15-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-15-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.1337 - accuracy: 0.9322 - val_loss: 0.8903 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 16/30
6/6 [==============================] - ETA: 0s - loss: 0.1312 - accuracy: 0.9548
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-16-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-16-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.1312 - accuracy: 0.9548 - val_loss: 0.9339 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 17/30
6/6 [==============================] - ETA: 0s - loss: 0.1457 - accuracy: 0.9322
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-17-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-17-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1457 - accuracy: 0.9322 - val_loss: 0.9713 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 18/30
6/6 [==============================] - ETA: 0s - loss: 0.1625 - accuracy: 0.9266
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-18-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-18-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1625 - accuracy: 0.9266 - val_loss: 1.0321 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 19/30
6/6 [==============================] - ETA: 0s - loss: 0.1603 - accuracy: 0.9492
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-19-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-19-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1603 - accuracy: 0.9492 - val_loss: 0.9848 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 20/30
6/6 [==============================] - ETA: 0s - loss: 0.1107 - accuracy: 0.9661
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-20-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-20-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.1107 - accuracy: 0.9661 - val_loss: 0.9446 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 21/30
6/6 [==============================] - ETA: 0s - loss: 0.1221 - accuracy: 0.9548
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-21-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-21-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.1221 - accuracy: 0.9548 - val_loss: 1.0198 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 22/30
6/6 [==============================] - ETA: 0s - loss: 0.1167 - accuracy: 0.9661
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-22-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-22-0.61.model\assets
6/6 [==============================] - 24s 4s/step - loss: 0.1167 - accuracy: 0.9661 - val_loss: 1.0387 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 23/30
6/6 [==============================] - ETA: 0s - loss: 0.1417 - accuracy: 0.9379
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-23-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-23-0.61.model\assets
6/6 [==============================] - 24s 4s/step - loss: 0.1417 - accuracy: 0.9379 - val_loss: 1.0374 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 24/30
6/6 [==============================] - ETA: 0s - loss: 0.1412 - accuracy: 0.9605
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-24-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-24-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1412 - accuracy: 0.9605 - val_loss: 1.0436 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 25/30
6/6 [==============================] - ETA: 0s - loss: 0.1511 - accuracy: 0.9492
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-25-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-25-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1511 - accuracy: 0.9492 - val_loss: 1.1132 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 26/30
6/6 [==============================] - ETA: 0s - loss: 0.1058 - accuracy: 0.9718
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-26-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-26-0.61.model\assets
6/6 [==============================] - 25s 4s/step - loss: 0.1058 - accuracy: 0.9718 - val_loss: 1.1229 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 27/30
6/6 [==============================] - ETA: 0s - loss: 0.1304 - accuracy: 0.9492
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-27-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-27-0.61.model\assets
6/6 [==============================] - 26s 5s/step - loss: 0.1304 - accuracy: 0.9492 - val_loss: 1.1232 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 28/30
6/6 [==============================] - ETA: 0s - loss: 0.1244 - accuracy: 0.9435
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-28-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-28-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.1244 - accuracy: 0.9435 - val_loss: 1.1104 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 29/30
6/6 [==============================] - ETA: 0s - loss: 0.1054 - accuracy: 0.9548
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-29-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-29-0.61.model\assets
6/6 [==============================] - 27s 5s/step - loss: 0.1054 - accuracy: 0.9548 - val_loss: 1.1542 - val_accuracy: 0.6053 - lr: 1.0000e-04
Epoch 30/30
6/6 [==============================] - ETA: 0s - loss: 0.0880 - accuracy: 0.9661
WARNING:absl:Found untraced functions such as _jit_compiled_convolution_op, _jit_compiled_convolution_op, _update_step_xla while saving (showing 3 of 3). These functions will not be directly callable after loading.
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-30-0.61.model\assets
INFO:tensorflow:Assets written to: models\cnn-parameters-improvement-30-0.61.model\assets
6/6 [==============================] - 26s 4s/step - loss: 0.0880 - accuracy: 0.9661 - val_loss: 1.2341 - val_accuracy: 0.6053 - lr: 1.0000e-04
Elapsed time: 0:13:3.3
In [24]:
history = model.history.history

for key in history.keys():
    print(key)

# val_acc = [0.6552631735801697, 0.7052631735801697, 0.7252631735801697, 0.7852631735801697, 0.7452631735801697, 0.7852631735801697, 0.8052631735801697, 0.7652631735801697, 0.8252631735801697, 0.8152631735801697, 0.8452631735801697, 0.8852631735801697, 0.8752631735801697, 0.9052631735801697, 0.912631735801697, 0.9052631735801697, 0.9352631735801697, 0.9552631735801697, 0.9452631735801697, 0.9552631735801697, 0.9352631735801697, 0.9452631735801697, 0.9252631735801697, 0.9552631735801697, 0.9652631735801697, 0.9452631735801697, 0.9552631735801697, 0.9352631735801697, 0.9452631735801697, 0.9452631735801697]
loss
accuracy
val_loss
val_accuracy
lr
In [26]:
def plot_metrics(history):

    train_loss = history['loss']
    val_loss = history['val_loss']
    train_acc = history['accuracy']
    val_acc = history['val_accuracy']
    

    # Loss
    plt.figure()
    plt.plot(train_loss, label='Training Loss')
    plt.plot(val_loss, label='Validation Loss')
    plt.title('Loss')
    plt.legend()
    plt.show()

    # Accuracy
    plt.figure()
    plt.plot(train_acc, label='Training Accuracy')
    plt.plot(val_acc, label='Validation Accuracy')
    plt.title('Accuracy')
    plt.legend()
    plt.show()

plot_metrics(history)
In [ ]: